End-to-End automation of the Cardiac Atlas Project, from SSFP MRI to CIM Mesh.
Steps:
# view selection
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import sys
module_path = os.path.abspath(os.path.join('../src/'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import pandas as pd
import ipywidgets as widgets
import pydicom
from tqdm import tqdm
import cv2
import shutil
import tensorflow as tf
import torch
import nnunet # directly imports local nnunet installation
from nnunet.inference.predict import predict_from_folder
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
print('Python: {}'.format(sys.version))
print('Pydicom: {}'.format(pydicom.__version__))
print('TensorFlow: {}'.format(tf.__version__))
print('Torch: {}'.format(torch.__version__))
Python: 3.6.15 | packaged by conda-forge | (default, Dec 3 2021, 18:49:41) [GCC 9.4.0] Pydicom: 2.3.0 TensorFlow: 2.4.0 Torch: 1.10.2+cu102
If your device has a gpu and the necessary software installed, tensorflow will be able to detect it and display it here. If not, the entire notebook can be run on the CPU, but will take a significantly longer amount of time, particularly if normalization and test time augmentations are utilized during landmark localization.
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
Found GPU at: /device:GPU:0
The automated view selection is designed to automatically identify and select MRI views that are relavent to cardiac modeling. The analysis is designed to be run over a directory containing one or multiple patient subdirectories. The code runs the analysis iteratively over each subdirectory.
The suggested directory structures for each approach are shown below:
Run the analysis over each patient in a directory individually (i.e., analysis for patient 1, then analysis for patient 2.)
├── DATA
├── Patient1
│ ├── 1.dcm
│ ├── 2.dcm
│ ├── 3.dcm
│ └── ...
└── Patient2
├── 1.dcm
├── 2.dcm
├── 3.dcm
└── ...
Change the settings below to match the desired patient and correct src, dst, and csv_path.
# PARAMETERS for the analysis
patient = 'CHD1055301'
src = "/home/ubuntu/CAP/CAP-FullAutomation/data/raw/DICOMS Longitudinal/" + patient # PATH to the directory containing the desired DICOM files (str)
dst = "/home/ubuntu/CAP/CAP-FullAutomation/data/processed/" # PATH to the output directory to save dicom files (only valid if save_dicoms = True) (str)
# view selection model
modelname = 'ResNet50' # The neural network to load and used (Options: VGG19, ResNet50, or Xception)
use_multiprocessing = False # Use multiprocessing to read header info (True or False)
# parameters for postprocessing/saving
csv_path = '/home/ubuntu/CAP/CAP-FullAutomation/data/processed/{}/view_predictions.csv'.format(patient) # PATH to save the generated csv file (only valide if create_csv = True) (str)
create_csv = True # Save a .csv file with the series level view predictions (True or False)
save_files = True # Save dicom files to new directory (dst) (True or False)
save_only_desired = False # Save only dicom files corresponding to desired views (True or False)
confidence_value = 0.5 # Only save series if the confidence is > a certain value (set to 0 to save all desired series, regardless of confidence) (float 0-1.0)
if not os.path.exists(os.path.join(dst, patient)):
os.mkdir(os.path.join(dst,patient))
# import the view selection scripts
from viewselection import ViewSelection
# initialize the viewSelector class with appropriate settings
viewSelector = ViewSelection(
src,
dst,
modelname,
csv_path,
create_csv,
use_multiprocessing,
save_files,
save_only_desired,
confidence_value
)
# initiate the tensorflow model
viewSelector.load_tensorflow_model()
# make predictions for src directory
viewSelector.complete_view_prediction()
# the view predictions are saved to the specified csv_path file. We can load in this csv file and
# see what views were predicted for each series in the raw MRI dump.
selected = pd.read_csv(csv_path)
# filter dataframe to desired frames only if desired
#selected = selected[selected['Confidence'] > confidence_value]
#selected = selected[selected['Predicted View'].isin(['4CH', '3CH', 'SA', 'LVOT', 'RVOT', '2CH RT', '2CH LT'])]
# display dataframe
selected
| Patient ID | Series ID | Series Number | Frames | Frames Per Slice | Series Description | Predicted View | Confidence | |
|---|---|---|---|---|---|---|---|---|
| 0 | CHD1055301 | 2.16.124.113543.6006.99.0495281801922651729 | 2303 | 140 | 1 | sphase_1 | OTHER | 1.00 |
| 1 | CHD1055301 | 2.16.124.113543.6006.99.04647351462160781493 | 1601 | 90 | 90 | flow_bh_rpa_sense | OTHER | 0.91 |
| 2 | CHD1055301 | 2.16.124.113543.6006.99.06665219082207082022 | 901 | 480 | 30 | sa_sense | SA | 1.00 |
| 3 | CHD1055301 | 2.16.124.113543.6006.99.5063835196956610329 | 2901 | 30 | 3 | psir-tfe_nav_20sl_sense | OTHER | 0.83 |
| 4 | CHD1055301 | 2.16.124.113543.6006.99.04587812860415322866 | 1201 | 90 | 90 | flow_bh_ao_sense | OTHER | 0.98 |
| 5 | CHD1055301 | 2.16.124.113543.6006.99.5071954445621763975 | 2501 | 28 | 1 | 3d_morphology | OTHER | 1.00 |
| 6 | CHD1055301 | 2.16.124.113543.6006.99.06695118164868377905 | 201 | 62 | 1 | survey_fb | OTHER | 1.00 |
| 7 | CHD1055301 | 2.16.124.113543.6006.99.5049920390923095264 | 2302 | 9 | 3 | 3_phase_mra | OTHER | 1.00 |
| 8 | CHD1055301 | 2.16.124.113543.6006.99.7523760588999490154 | 2305 | 140 | 1 | sphase_3 | OTHER | 0.99 |
| 9 | CHD1055301 | 2.16.124.113543.6006.99.04534284476839490030 | 1701 | 90 | 90 | flow_bh_lpa_sense | OTHER | 1.00 |
| 10 | CHD1055301 | 2.16.124.113543.6006.99.06622763533432680474 | 501 | 10 | 1 | bb_-_ax_pa_clear | OTHER | 1.00 |
| 11 | CHD1055301 | 2.16.124.113543.6006.99.5087032377940069636 | 2601 | 11 | 11 | look-locker_delay_sense | SA | 0.55 |
| 12 | CHD1055301 | 2.16.124.113543.6006.99.04598771863932720919 | 1301 | 90 | 90 | flow_bh_pa_sense | OTHER | 1.00 |
| 13 | CHD1055301 | 2.16.124.113543.6006.99.04604028601838123734 | 1001 | 60 | 30 | lvot_sense | 3CH | 0.87 |
| 14 | CHD1055301 | 2.16.124.113543.6006.99.5063183876931479456 | 2801 | 18 | 3 | psir-tfe_nav_20sl_sense | OTHER | 0.83 |
| 15 | CHD1055301 | 2.16.124.113543.6006.99.06652052552521727870 | 301 | 10 | 1 | interactive | OTHER | 0.40 |
| 16 | CHD1055301 | 2.16.124.113543.6006.99.06640870602310540123 | 801 | 120 | 30 | rvot_sense | RVOT | 1.00 |
| 17 | CHD1055301 | 2.16.124.113543.6006.99.06711261797294843784 | 101 | 62 | 1 | survey_bh | OTHER | 1.00 |
| 18 | CHD1055301 | 2.16.124.113543.6006.99.5722133481080187987 | 2304 | 140 | 1 | sphase_2 | OTHER | 1.00 |
| 19 | CHD1055301 | 2.16.124.113543.6006.99.04604608183767446013 | 1101 | 60 | 30 | rt_2ch_sense | 2CH RT | 1.00 |
| 20 | CHD1055301 | 2.16.124.113543.6006.99.04520090814986991264 | 1801 | 5 | 1 | bb_-_ao | RVOT | 0.60 |
| 21 | CHD1055301 | 2.16.124.113543.6006.99.5061626230520684283 | 2701 | 54 | 3 | psir-tfe_nav_20sl_sense | OTHER | 1.00 |
| 22 | CHD1055301 | 2.16.124.113543.6006.99.06674102928867389259 | 701 | 120 | 30 | 4ch_sense | 4CH | 1.00 |
| 23 | CHD1055301 | 2.16.124.113543.6006.99.06620680814855587194 | 601 | 4 | 1 | bb_-_rvot_clear | RVOT | 1.00 |
# generate widget to change selections
options = selected[['Series Number', 'Series Description', 'Predicted View', 'Confidence']]
items = [widgets.Checkbox(
value=False,
description='Series {} - {} - {} ({})'.format(row[0], row[1], row[2], row[3]),
disabled=False,
indent=False
) for i,row in options.iterrows()]
grid = widgets.GridBox(items, layout=widgets.Layout(grid_template_columns="repeat(1, 100px)"))
print('Select Desired LAX and SA Series:')
grid
Select Desired LAX and SA Series:
# remove undesired series from processed directory
selected_series = []
for i in range(len(items)):
value = items[i].value
if value == True:
selected_series.append(int(items[i].description.split(' ')[1]))
print('Selected Series: ', selected_series)
for (root, subdirs, files) in os.walk(dst + patient):
for file in files:
if '.dcm' in file:
dcm = pydicom.dcmread(os.path.join(root, file))
if dcm.SeriesNumber not in selected_series:
os.remove(os.path.join(root, file))
#os.remove(root)
else:
pass
selected = selected[selected['Series Number'].isin(selected_series)]
print('Undesired files removed')
# record number of phases from SAX
num_phases = int(selected[selected['Predicted View'] == 'SA']['Frames Per Slice'])
selected
Selected Series: [901, 1001, 801, 1101, 701] Undesired files removed
| Patient ID | Series ID | Series Number | Frames | Frames Per Slice | Series Description | Predicted View | Confidence | |
|---|---|---|---|---|---|---|---|---|
| 2 | CHD1055301 | 2.16.124.113543.6006.99.06665219082207082022 | 901 | 480 | 30 | sa_sense | SA | 1.00 |
| 13 | CHD1055301 | 2.16.124.113543.6006.99.04604028601838123734 | 1001 | 60 | 30 | lvot_sense | 3CH | 0.87 |
| 16 | CHD1055301 | 2.16.124.113543.6006.99.06640870602310540123 | 801 | 120 | 30 | rvot_sense | RVOT | 1.00 |
| 19 | CHD1055301 | 2.16.124.113543.6006.99.04604608183767446013 | 1101 | 60 | 30 | rt_2ch_sense | 2CH RT | 1.00 |
| 22 | CHD1055301 | 2.16.124.113543.6006.99.06674102928867389259 | 701 | 120 | 30 | 4ch_sense | 4CH | 1.00 |
If necessary, the predicted views can be corrected using the code below (currently set to Raw NBConvert) in order to limit accidental changes. Enter the series number to change, the currently predicted view, and the actual (or correct) view. The code will copy the dicom files to the correct directory.
The number of frames (or phases) per slice is automatically selected from the SAX stack. In some cases, the number of frames per slice may be different for different views (e.g., 30 frames per slice in the SAX, but only 20 frames per slice in the 4CH view). If this is the case, be sure to appropriately enter change the number of phases when performing landmark localization predictions below.
Similarly, if a dicom file is missing, the number of frames per slice may be inaccurately low. For example, with one dicom missing, the number of frames might be set to 29, when it should actually be 30. Be sure to manually correct this error if it occurs.
Now that the views have been selected, we can export the slice info file for use with the BiV Modelling v2 code. The first code generates a pandas dataframe of this information (used throughout this notebook) while the second code exports it to a txt file.
# generate slice info file for use with python CIM
sliceID = 0
out = []
# check for SA first
for subdir in os.listdir(os.path.join(dst, patient)):
#if subdir == 'SA':
if '.' not in subdir:
stored_locs = []
for file in os.listdir(os.path.join(dst, patient, subdir)):
if '.dcm' in file:
dcm = pydicom.dcmread(os.path.join(dst, patient, subdir, file))
#if dcm.SliceLocation in good_locations:
SOPInstanceUID = str(dcm.SOPInstanceUID)
rows = dcm.Rows
cols = dcm.Columns
imagePositionPatient = dcm.ImagePositionPatient
imageOrientationPatient = dcm.ImageOrientationPatient
pixelSpacing = dcm.PixelSpacing
if subdir == '4CH':
dcm_t = dcm
if dcm.SliceLocation not in stored_locs:
row = [sliceID, file, subdir, dcm.SliceLocation, [rows, cols], imagePositionPatient, imageOrientationPatient, pixelSpacing]
out.append(row)
stored_locs.append(dcm.SliceLocation)
sliceID += 1
# generate dataframe
slice_info_df = pd.DataFrame(out, columns = ['Slice ID', 'File', 'View', 'Slice Location', 'Size', 'ImagePositionPatient', 'ImageOrientationPatient', 'Pixel Spacing'])
# generate mapping dictionary (short-axis slice to slice id) for future use
sa_df_sorted = slice_info_df[slice_info_df['View']=='SA'].sort_values('Slice Location', axis=0)
sa_mapping_dict = {}
for i, row in enumerate(sa_df_sorted.iterrows()):
index, row = row[0], row[1]
slice_id = row['Slice ID']
sa_mapping_dict[slice_id] = i
# display slice info
slice_info_df
| Slice ID | File | View | Slice Location | Size | ImagePositionPatient | ImageOrientationPatient | Pixel Spacing | |
|---|---|---|---|---|---|---|---|---|
| 0 | 0 | MR.2.16.124.113543.6006.99.0664087060231054012... | RVOT | 0.000000 | [256, 256] | [105.002112731337, -124.14129754155, 184.70926... | [-0.5386930108070, 0.84117394685745, -0.047288... | [1.484375, 1.484375] |
| 1 | 1 | MR.2.16.124.113543.6006.99.0664087060231054012... | RVOT | 21.000000 | [256, 256] | [87.3182505220174, -135.46739693731, 184.68710... | [-0.5386930108070, 0.84117394685745, -0.047288... | [1.484375, 1.484375] |
| 2 | 2 | MR.2.16.124.113543.6006.99.0664087060231054012... | RVOT | 14.000000 | [256, 256] | [93.2128714174032, -131.69203110784, 184.69449... | [-0.5386930108070, 0.84117394685745, -0.047288... | [1.484375, 1.484375] |
| 3 | 3 | MR.2.16.124.113543.6006.99.0664087060231054012... | RVOT | 6.999999 | [256, 256] | [99.1074921935796, -127.91666337102, 184.70188... | [-0.5386930108070, 0.84117394685745, -0.047288... | [1.484375, 1.484375] |
| 4 | 4 | MR.2.16.124.113543.6006.99.0460402860183812373... | 3CH | 0.000000 | [256, 256] | [-165.42910549230, -158.93103365600, 97.460791... | [0.89826780557632, -0.0896837934851, -0.430199... | [1.484375, 1.484375] |
| 5 | 5 | MR.2.16.124.113543.6006.99.0460402860183812373... | 3CH | 8.000001 | [256, 256] | [-161.91457531042, -157.27283625304, 104.45353... | [0.89826780557632, -0.0896837934851, -0.430199... | [1.484375, 1.484375] |
| 6 | 6 | MR.2.16.124.113543.6006.99.0667410292886738925... | 4CH | 15.999999 | [256, 256] | [-201.57067303359, -177.33489755541, -106.1322... | [0.98218160867691, 0.02390478178858, -0.186407... | [1.484375, 1.484375] |
| 7 | 7 | MR.2.16.124.113543.6006.99.0667410292886738925... | 4CH | 7.999999 | [256, 256] | [-202.87507586181, -172.52188257128, -112.3878... | [0.98218160867691, 0.02390478178858, -0.186407... | [1.484375, 1.484375] |
| 8 | 8 | MR.2.16.124.113543.6006.99.0667410292886738925... | 4CH | 24.000000 | [256, 256] | [-200.26627020537, -182.14791444689, -99.87653... | [0.98218160867691, 0.02390478178858, -0.186407... | [1.484375, 1.484375] |
| 9 | 9 | MR.2.16.124.113543.6006.99.0667410292886738925... | 4CH | 0.000000 | [256, 256] | [-204.17947866022, -167.70886567980, -118.6435... | [0.98218160867691, 0.02390478178858, -0.186407... | [1.484375, 1.484375] |
| 10 | 10 | MR.2.16.124.113543.6006.99.0460460818376744601... | 2CH RT | 0.000000 | [256, 256] | [-189.61446161568, 98.8013261556625, 174.82972... | [0.96094638109207, -0.2767346203327, -2.385309... | [1.484375, 1.484375] |
| 11 | 11 | MR.2.16.124.113543.6006.99.0460460818376744601... | 2CH RT | 8.000000 | [256, 256] | [-187.51418085396, 106.094442725181, 172.30004... | [0.96094638109207, -0.2767346203327, -2.385309... | [1.484375, 1.484375] |
| 12 | 12 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 28.000001 | [256, 256] | [105.281876504421, -147.36656352877, 154.51231... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 13 | 13 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 70.000002 | [256, 256] | [69.9733285307884, -133.50171896815, 172.54208... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 14 | 14 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 49.000001 | [256, 256] | [87.6276025176048, -140.43414160609, 163.52719... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 15 | 15 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 0.000000 | [256, 256] | [128.820908486843, -156.60979315638, 142.49246... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 16 | 16 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 14.000001 | [256, 256] | [117.051392495632, -151.98817846179, 148.50239... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 17 | 17 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 42.000001 | [256, 256] | [93.5123605132103, -142.74494883418, 160.52223... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 18 | 18 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 98.000001 | [256, 256] | [46.4342970252037, -124.25849005579, 184.56192... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 19 | 19 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 7.000002 | [256, 256] | [122.936150491237, -154.29898592829, 145.49743... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 20 | 20 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 56.000001 | [256, 256] | [81.7428445219993, -138.12333342432, 166.53215... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 21 | 21 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 77.000001 | [256, 256] | [64.0885705351829, -131.19091078639, 175.54704... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 22 | 22 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 91.000001 | [256, 256] | [52.3190548717975, -126.56929633021, 181.55696... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 23 | 23 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 63.000001 | [256, 256] | [75.8580865263938, -135.81252619624, 169.53712... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 24 | 24 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 105.000001 | [256, 256] | [40.5495385527610, -121.94768187403, 187.56688... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 25 | 25 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 84.000001 | [256, 256] | [58.2038130164146, -128.88010451197, 178.55200... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 26 | 26 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 35.000000 | [256, 256] | [99.3971185088157, -145.05575606226, 157.51727... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
| 27 | 27 | MR.2.16.124.113543.6006.99.0666521908220708202... | SA | 21.000000 | [256, 256] | [111.166634500026, -149.67737093567, 151.50735... | [0.22095948457717, 0.93282639980316, -0.284625... | [1.484375, 1.484375] |
# write to slice info file
with open(os.path.join(dst, patient, 'SliceInfo.txt'), 'w') as f:
for i, row in slice_info_df.iterrows():
sliceID = row['Slice ID']
file = row['File']
view = row['View']
imagePositionPatient = row['ImagePositionPatient']
imageOrientationPatient = row['ImageOrientationPatient']
pixelSpacing = row['Pixel Spacing']
f.write('{}\t'.format(file))
f.write('frameID: {}\t'.format(sliceID))
f.write('timeFrame\t1\t')
f.write('ImagePositionPatient\t')
f.write('{}\t{}\t{}\t'.format(imagePositionPatient[0], imagePositionPatient[1], imagePositionPatient[2]))
f.write('ImageOrientationPatient\t')
f.write('{}\t{}\t{}\t{}\t{}\t{}\t'.format(imageOrientationPatient[0], imageOrientationPatient[1], imageOrientationPatient[2],
imageOrientationPatient[3], imageOrientationPatient[4], imageOrientationPatient[5]))
f.write('Pixel Spacing\t')
f.write('{}\t{}\n'.format(pixelSpacing[0], pixelSpacing[1]))
The following code performs end-systolic phase selection from the short-axis stack of images selected above. The end-systolic phase is assumed to be 0 (may not necessarily be the case in all scenarios, but has been for all of the cases processed to date).
The model performs an ES phase prediction over each short-axis slice, then averages the predictions to produce a final result.
# import the phase selection module
from phaseselection import PhaseSelection
# initialize the phase selector with the list of short-axis images (unordered) and series IDs
phaseSelector = PhaseSelection( os.path.join(dst, patient),
slice_info_df,
'SA' )
# initialize the tensorflow model and inputs
phaseSelector.load_tensorflow_model()
phaseSelector.generate_mapping_dictionary()
phaseSelector.generate_complete_volume()
# make phase predictions
es_phase = phaseSelector.predict_phase()
# add prediction to dataframe loaded from csv previously
selected.loc[selected['Predicted View'] == 'SA', ['ES Phase Prediction']] = es_phase
# save this dataframe to a new csv file
selected.to_csv('../data/processed/{}/phase_predictions.csv'.format(selected.iloc[0]['Patient ID']))
# number of phases in SAX stack
es_phase_float = es_phase / num_phases # enables support for patients with differing number of phases per view
selected
100%|██████████| 11/11 [00:12<00:00, 1.11s/it]
| Patient ID | Series ID | Series Number | Frames | Frames Per Slice | Series Description | Predicted View | Confidence | ES Phase Prediction | |
|---|---|---|---|---|---|---|---|---|---|
| 2 | CHD1055301 | 2.16.124.113543.6006.99.06665219082207082022 | 901 | 480 | 30 | sa_sense | SA | 1.00 | 12.0 |
| 13 | CHD1055301 | 2.16.124.113543.6006.99.04604028601838123734 | 1001 | 60 | 30 | lvot_sense | 3CH | 0.87 | NaN |
| 16 | CHD1055301 | 2.16.124.113543.6006.99.06640870602310540123 | 801 | 120 | 30 | rvot_sense | RVOT | 1.00 | NaN |
| 19 | CHD1055301 | 2.16.124.113543.6006.99.04604608183767446013 | 1101 | 60 | 30 | rt_2ch_sense | 2CH RT | 1.00 | NaN |
| 22 | CHD1055301 | 2.16.124.113543.6006.99.06674102928867389259 | 701 | 120 | 30 | 4ch_sense | 4CH | 1.00 | NaN |
In the next step, we will select the slices of the SA stack that will be useable for cardiac modeling (typically spanning from the apex to the base).
To improve consistency of the validation between the automated pipeline and the manually generated models, I also incorporated code to automatically load the slice info file from manually generated cases, and display which short-axis slices were selected. If you do not have the manual slice info files for your cases, simply skip this cell block.
# process manual slice info file
# path to patient directory of manually generated cases
path = '../data/raw/LongitudinalGPT/' + patient
# open slice info file
with open(os.path.join(path, 'SliceInfo.txt'), 'r') as f:
lines = f.readlines()
flag = False
out = []
for line in lines:
if 'SOPInstanceUID' in line:
_, dcm, view = line.split(' ')
view = view.rstrip('\n')
if flag == True:
x,y,z = line.split(' ')
z = z.rstrip('\n')
flag = False
out.append([view, dcm, [x, y, z]])
if 'ImagePositionPatient' in line:
flag = True
# generate dataframe of manual information
manual_df = pd.DataFrame(out, columns=['View', 'File', 'Pos'])
# determine which slice IDs are included in the manual dataframe
manual_selected = [np.round(float(x[0]), 2) for x in list(manual_df['Pos'])]
manual_selected_ids = []
for i, row in slice_info_df.iterrows():
if row['View'] == 'SA':
image_position = row['ImagePositionPatient']
if np.round(image_position[0], 2) in manual_selected:
manual_selected_ids.append(int(row['Slice ID']))
print('Manually Selected SAX IDs: ', manual_selected_ids)
manual_df
Manually Selected SAX IDs: [12, 13, 16, 17, 20]
| View | File | Pos | |
|---|---|---|---|
| 0 | SA | CAP_CHD1055301_MR_SA_SENSE_hrt_raw_20190304193... | [69.973329, -133.501719, 172.542084] |
| 1 | SA | CAP_CHD1055301_MR_SA_SENSE_hrt_raw_20190304193... | [117.051392, -151.988178, 148.502392] |
| 2 | SA | CAP_CHD1055301_MR_SA_SENSE_hrt_raw_20190304193... | [105.281877, -147.366564, 154.512314] |
| 3 | SA | CAP_CHD1055301_MR_SA_SENSE_hrt_raw_20190304193... | [93.512361, -142.744949, 160.522236] |
| 4 | SA | CAP_CHD1055301_MR_SA_SENSE_hrt_raw_20190304193... | [81.742845, -138.123333, 166.532158] |
| 5 | LA | CAP_CHD1055301_MR_4CH_SENSE__hrt_raw_201903041... | [-204.179479, -167.708866, -118.643543] |
| 6 | LA | CAP_CHD1055301_MR_RVOT_SENSE_hrt_raw_201903041... | [93.212871, -131.692031, 184.694495] |
| 7 | LA | CAP_CHD1055301_MR_LVOT_SENSE_hrt_raw_201903041... | [-165.429105, -158.931034, 97.460792] |
| 8 | LA | CAP_CHD1055301_MR_RT_2CH_SENSE_hrt_raw_2019030... | [-187.514181, 106.094443, 172.300048] |
# load the slice selection model
MODELPATH = '../models/SliceSelection/slice_selection.hdf5'
model = tf.keras.models.load_model(MODELPATH)
# find SAX slice IDs
sax_ids = slice_info_df[slice_info_df['View'] == 'SA']['Slice ID']
min_sax_id = np.min(sax_ids)
max_sax_id = np.max(sax_ids)
slice_selection_preds = []
for i in range(min_sax_id, max_sax_id):
for phase in range(phaseSelector.volume.shape[1]):
image = phaseSelector.volume[i, phase, :, :, 0]
# resize image
img = np.expand_dims(image, -1)
img = img/np.max(img)
# normalize
img = img*255
img = img.astype(np.uint8)
img = cv2.resize(img, (224,224))
# convert to rgb
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
# make prediction
pred = model.predict(np.expand_dims(img, 0))
location = float(slice_info_df[slice_info_df['Slice ID'] == i]['Slice Location'])
slice_selection_preds.append([phase, i, location, np.argmax(pred)])
The code below organizes the predictions made by the slice selection model, and incorporates manual labels if available.
# create dataframe of predictions and record slice locations
preds_df = pd.DataFrame(slice_selection_preds, columns=['Phase', 'Slice ID', 'Location', 'Prediction'])
slice_locations = np.sort([float(x) for x in preds_df['Location'].unique()])
# iterate through predicted locations, recording which locations meet acceptance criteria
good_locations = []
report = []
for loc in slice_locations:
loc_df = preds_df[preds_df['Location'] == loc]
if np.mean(loc_df['Prediction']) > 0.75: # acceptance criteria (>75% of images at this location predicted as acceptable)
label = 'Good'
else:
label = 'Bad'
slice_id = int(preds_df[preds_df['Location'] == loc].iloc[0]['Slice ID'])
# check if manual slice info file was loaded
if manual_selected_ids:
if slice_id in manual_selected_ids:
manual_label = 'Good'
else:
manual_label = 'Bad'
else:
manual_label = 'Not available'
report.append([slice_id, np.round(loc, 1), np.mean(loc_df['Prediction']), label, manual_label])
# generate final dataframe and display
sa_final_df = pd.DataFrame(report, columns=['Slice ID', 'Slice Location', 'Pred Value', 'Pred Label', 'Manual Label'])
sa_final_df
| Slice ID | Slice Location | Pred Value | Pred Label | Manual Label | |
|---|---|---|---|---|---|
| 0 | 15 | 0.0 | 0.000000 | Bad | Bad |
| 1 | 19 | 7.0 | 0.033333 | Bad | Bad |
| 2 | 16 | 14.0 | 0.500000 | Bad | Good |
| 3 | 12 | 28.0 | 0.900000 | Good | Good |
| 4 | 26 | 35.0 | 1.000000 | Good | Bad |
| 5 | 17 | 42.0 | 1.000000 | Good | Good |
| 6 | 14 | 49.0 | 1.000000 | Good | Bad |
| 7 | 20 | 56.0 | 1.000000 | Good | Good |
| 8 | 23 | 63.0 | 1.000000 | Good | Bad |
| 9 | 13 | 70.0 | 0.633333 | Bad | Good |
| 10 | 21 | 77.0 | 0.000000 | Bad | Bad |
| 11 | 25 | 84.0 | 0.000000 | Bad | Bad |
| 12 | 22 | 91.0 | 0.000000 | Bad | Bad |
| 13 | 18 | 98.0 | 0.000000 | Bad | Bad |
| 14 | 24 | 105.0 | 0.000000 | Bad | Bad |
# Display each slice location, with associated label.
# If manually selected cases are available, these labels are automatically used
fig, ax = plt.subplots(int(np.ceil(len(slice_locations)/4)), 4, figsize=(10,10))
for i in range(4):
for j in range(4):
idx = i*4 + j
if idx > len(slice_locations)-1:
break
# select row and slice id
row_df = sa_final_df.iloc[idx]
slice_id = row_df['Slice ID']
# the image from the ES phase is automatically selected
ax[i,j].imshow(phaseSelector.volume[slice_id, es_phase, :, :, 0], cmap='gray')
if manual_selected_ids:
if slice_id in manual_selected_ids:
ax[i,j].set_title('{} - Accept'.format(int(row_df['Slice Location'])))
ax[i,j].title.set_color('green')
else:
ax[i,j].set_title('{}'.format(int(row_df['Slice Location'])))
else:
if row_df['Pred Label'] == 'Good':
ax[i,j].set_title('{} - Accept'.format(int(row_df['Slice Location'])))
ax[i,j].title.set_color('green')
else:
ax[i,j].set_title('{}'.format(int(row_df['Slice Location'])))
# format plot
ax[i,j].set_xticks([])
ax[i,j].set_yticks([])
plt.show()
Manually selected SAX slices typically range from base to apex, with every other slice selected. However, both the segmentation and landmark localization models struggle on the most apical and basal slices. Consequently, these slices are optimally excluded from the automated pipeline. In almost all cases I processed, the most basal manual SAX slice was too far basal for use in the automated pipeline. Basal slices that transect the valve planes and/or right ventricular outflow tract should be excluded.
I typically select 3 to 4 SAX slices, ranging from just below the valve planes to the apex. I select every other slice, similar to the manual cases.
# make widget with sa slice options
items = []
for i, row in sa_final_df.iterrows():
if row['Slice ID'] in manual_selected_ids:
items.append(widgets.Checkbox(
value=True,
description='Slice {} - {}'.format(i, float(row['Slice Location'])),
disabled=False,
indent=False))
else:
items.append(widgets.Checkbox(
value=False,
description='Slice {} - {}'.format(i, float(row['Slice Location'])),
disabled=False,
indent=False))
grid = widgets.GridBox(items, layout=widgets.Layout(grid_template_columns="repeat(1, 100px)"))
grid
# remove undesired SA series
good_locations = []
for i in range(len(items)):
value = items[i].value
if value == True:
good_locations.append(float(items[i].description.split(' ')[-1]))
good_locations
[28.0, 42.0, 56.0]
The following code can be used to perform landmark localization. The points that are predicted are the following:
The RV insertion points from the SAX stack are typically fairly accurate; consequently, normalization and test time augmentations (both of which significantly increase required processing time) may not be necessary. For the 4CH, 3CH, and RVOT views, I recommend including both normalization and at least 5 test time augmentations.
Predict the RV insertion points from the SAX stack.
# convert selected slice locations into slice IDs
selected_sa_ids = list(sa_final_df[sa_final_df['Slice Location'].isin(good_locations)]['Slice ID'])
sa_final_df[sa_final_df['Slice Location'].isin(good_locations)]
| Slice ID | Slice Location | Pred Value | Pred Label | Manual Label | |
|---|---|---|---|---|---|
| 3 | 12 | 28.0 | 0.9 | Good | Good |
| 5 | 17 | 42.0 | 1.0 | Good | Good |
| 7 | 20 | 56.0 | 1.0 | Good | Good |
from landmarklocalization import LandmarkLocalization
# Initiate a landmark localization module in the short-axis view
ShortAxisLoc = LandmarkLocalization( os.path.join(dst, patient), slice_info_df, 'SA' )
# load model
ShortAxisLoc.load_tensorflow_model()
# Settings
ShortAxisLoc.num_phases = num_phases # calculated from view selections
ShortAxisLoc.test_time_augmentations = False # Use test time augmentations to improve predictions (often not necessary)
ShortAxisLoc.normalize_predictions = True # Smooth predictions throughout cardiac cycle
ShortAxisLoc.number_of_test_augmentations = 5 # Number of test-time predictions
# Generate Input
ShortAxisLoc.generate_mapping_dictionary()
ShortAxisLoc.generate_complete_volume()
# Make landmark predictions for ED phase
ShortAxisLoc.predict_landmarks(prediction_phase=0)
ed_output = ShortAxisLoc.output
# Make landmark predictions for ES phase
ShortAxisLoc.predict_landmarks(prediction_phase=int(es_phase_float * ShortAxisLoc.num_phases) )
es_output = ShortAxisLoc.output
# Store outputs and display inputs/predictions for review
volume = ShortAxisLoc.volume
cols = 4
rows = int(np.ceil(len(selected_sa_ids)/cols))
plt.figure(figsize = (16,4*rows))
count = 1
for i, slice in enumerate(ed_output):
slice_id = slice[0]
if slice_id in selected_sa_ids:
p1 = slice[3]
p2 = slice[4]
image = volume[slice_id, 0, :, :, 0]
ax = plt.subplot(rows, cols, count)
ax.imshow(image, cmap='gray')
if p1 != None:
ax.scatter(x=p1[1], y=p1[0])
if p2 != None:
ax.scatter(x=p2[1], y=p2[0])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(slice_id)
count += 1
plt.suptitle('End-Diastolic Short-Axis RV Inserts')
plt.show()
plt.figure(figsize = (16,4*rows))
count = 1
for i, slice in enumerate(es_output):
slice_id = slice[0]
if slice_id in selected_sa_ids:
p1 = slice[3]
p2 = slice[4]
image = volume[slice_id, int(es_phase_float * ShortAxisLoc.num_phases), :, :, 0]
ax = plt.subplot(rows, cols, count)
ax.imshow(image, cmap='gray')
if p1 != None:
ax.scatter(x=p1[1], y=p1[0])
if p2 != None:
ax.scatter(x=p2[1], y=p2[0])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(slice_id)
count += 1
plt.suptitle('End-Systolic Short-Axis RV Inserts')
plt.show()
# store the RV insert predictions for both the ED and ES phases
sa_df = pd.DataFrame(ed_output, columns=['Slice ID', 'View', 'Time Frame', 'RV1', 'RV2'])
sa_df = sa_df.append(pd.DataFrame(es_output, columns=['Slice ID', 'View', 'Time Frame', 'RV1', 'RV2']))
sa_df = sa_df[sa_df['Slice ID'].isin(selected_sa_ids)]
sa_df.reset_index(inplace=True, drop=True)
sa_df
| Slice ID | View | Time Frame | RV1 | RV2 | |
|---|---|---|---|---|---|
| 0 | 12 | SA | 0 | [107, 107] | [129, 120] |
| 1 | 17 | SA | 0 | [108, 108] | [135, 115] |
| 2 | 20 | SA | 0 | [107, 107] | [138, 115] |
| 3 | 12 | SA | 12 | [110, 99] | [123, 114] |
| 4 | 17 | SA | 12 | [110, 107] | [127, 109] |
| 5 | 20 | SA | 12 | [107, 105] | [133, 107] |
Predict the MV inserts, TV inserts, and LV apex from the 4CH view. Select only one 4CH slice for inclusion in the final model.
For the landmark localization models, cardiac orientation matters. These models were trained on images with the LV apex pointed towards the top right corner of the image, with the RV (and tricuspid valve) located above the LV (the standard orientation). If this is not the case for your images, utilizing the provided settings "flip_ud" (flip image up/down) and "flip_lr" (flip image left/right) can be used to achieve the correct orientation. This is necessary to achieve useable predictions if your orientation is incorrect.
# Initiate a landmark localization module in the four chamber view
FourChamberLoc = LandmarkLocalization( os.path.join(dst, patient), slice_info_df, '4CH' )
# Load tensorflow model and inputs
FourChamberLoc.load_tensorflow_model()
# Settings
FourChamberLoc.num_phases = num_phases # Change number of phases if necessary
flip_ud = False
flip_lr = False
FourChamberLoc.test_time_augmentations = True # Use test time augmentations to improve predictions (often not necessary)
FourChamberLoc.normalize_predictions = True # Smooth predictions throughout cardiac cycle
FourChamberLoc.number_of_test_augmentations = 5 # Number of test-time predictions
FourChamberLoc.flip_ud = flip_ud
FourChamberLoc.flip_lr = flip_lr
# Generate Inputs
FourChamberLoc.generate_mapping_dictionary()
FourChamberLoc.generate_complete_volume()
# Make predictions for ED phase
FourChamberLoc.predict_landmarks(prediction_phase=0)
ed_output = FourChamberLoc.output
# Make predictions for ES phase
FourChamberLoc.predict_landmarks(prediction_phase=int(es_phase_float * FourChamberLoc.num_phases) )
es_output = FourChamberLoc.output
# display predictions
volume = FourChamberLoc.volume
cols = 4
rows = int(np.ceil(len(ed_output)/cols))
plt.figure(figsize = (16,4*rows))
for i, slice in enumerate(ed_output):
slice_id = slice[0]
p1 = slice[3]
p2 = slice[4]
p3 = slice[5]
p4 = slice[6]
p5 = slice[7]
image = volume[slice_id, 0, :, :, 0]
if flip_ud == True:
image = np.flipud(image)
elif flip_lr == True:
image = np.fliplr(image)
ax = plt.subplot(rows,cols,i+1)
ax.set_title(slice_id)
ax.imshow(image, cmap='gray')
for p in [p1,p2,p3,p4,p5]:
try:
ax.scatter(x=p[1], y=p[0])
except:
pass
plt.suptitle('End-Diastolic Four Chamber')
plt.show()
cols = 4
rows = int(np.ceil(len(es_output)/cols))
plt.figure(figsize = (16,4*rows))
for i, slice in enumerate(es_output):
slice_id = slice[0]
p1 = slice[3]
p2 = slice[4]
p3 = slice[5]
p4 = slice[6]
p5 = slice[7]
image = volume[slice_id, int(es_phase_float * FourChamberLoc.num_phases), :, :, 0]
if flip_ud == True:
image = np.flipud(image)
elif flip_lr == True:
image = np.fliplr(image)
ax = plt.subplot(rows,cols,i+1)
ax.set_title(slice_id)
ax.imshow(image, cmap='gray')
for p in [p1,p2,p3,p4,p5]:
try:
ax.scatter(x=p[1], y=p[0])
except:
pass
plt.suptitle('End-Systolic Four Chamber')
plt.show()
# SELECT ONE 4CH SLICE FOR USE IN THE MODEL
options = list(slice_info_df[slice_info_df['View'] == '4CH']['Slice ID'])
items = [widgets.Checkbox(
value=False,
description='Slice ID {}'.format(i),
disabled=False,
indent=False
) for i in options]
grid = widgets.GridBox(items, layout=widgets.Layout(grid_template_columns="repeat(1, 100px)"))
grid
# record selected slice and landmarks
selected_4ch_locations = []
for i in range(len(items)):
value = items[i].value
if value == True:
selected_4ch_locations.append(int(items[i].description.split(' ')[-1]))
four_chamber_df = pd.DataFrame(ed_output, columns=['Slice ID', 'View', 'Time Frame', 'MV1', 'MV2', 'TV1', 'TV2', 'LVA'])
four_chamber_df = four_chamber_df.append(
pd.DataFrame(es_output, columns=['Slice ID', 'View', 'Time Frame', 'MV1', 'MV2', 'TV1', 'TV2', 'LVA'])
)
four_chamber_df = four_chamber_df[four_chamber_df['Slice ID'].isin(selected_4ch_locations)]
four_chamber_df.reset_index(inplace=True, drop=True)
four_chamber_df
| Slice ID | View | Time Frame | MV1 | MV2 | TV1 | TV2 | LVA | |
|---|---|---|---|---|---|---|---|---|
| 0 | 9 | 4CH | 0 | [136, 127] | [154, 136] | [131, 129] | [116, 116] | [120, 174] |
| 1 | 9 | 4CH | 12 | [135, 129] | [149, 141] | [130, 129] | [115, 120] | [118, 170] |
If the landmark localization predictions are incorrect, they can be manually corrected using the Annotate module. Ideally, this tool will be improved in future versions of this notebook.
Referencing the landmark dataframe above, enter the index number (0 or 1), slice ID, and landmarks to correct. The Annotate class will open a matplotlib figure of the view / slice ID selected. Draw a line on the image across the valve plane by clicking twice, once at each valve insertion point. Once the line has been drawn, save the correct valve insertion points by running annotator.update() in the cell block below. The 4CH views are typically accurate; however, the AV and PV insertion points below may need to be corrected.
Predict MV insertions and AV insertions from the 3CH view.
# Initiate a landmark localization module in the three chamber view
ThreeChamberLoc = LandmarkLocalization( os.path.join(dst, patient) , slice_info_df, '3CH' )
# Load tensorflow model and inputs
ThreeChamberLoc.load_tensorflow_model()
# Settings
ThreeChamberLoc.num_phases = num_phases # Change number of phases if necessary
flip_ud = False
flip_lr = False
ThreeChamberLoc.normalize_predictions = True
ThreeChamberLoc.test_time_augmentations = True
ThreeChamberLoc.number_of_test_augmentations = 5
ThreeChamberLoc.flip_ud = flip_ud
ThreeChamberLoc.flip_lr = flip_lr
# Generate Input
ThreeChamberLoc.generate_mapping_dictionary()
ThreeChamberLoc.generate_complete_volume()
# Make predictions for ED phase
ThreeChamberLoc.predict_landmarks(prediction_phase=0)
ed_output = ThreeChamberLoc.output
# Make predictions for ES phase
ThreeChamberLoc.predict_landmarks(prediction_phase = int(es_phase_float * ThreeChamberLoc.num_phases))
es_output = ThreeChamberLoc.output
%matplotlib inline
# plot 3CH predictions
volume = ThreeChamberLoc.volume
cols = 4
rows = int(np.ceil(len(ed_output)/cols))
plt.figure(figsize = (16,4*rows))
for i, slice in enumerate(ed_output):
slice_id = slice[0]
p1 = slice[3]
p2 = slice[4]
p3 = slice[5]
p4 = slice[6]
image = volume[slice_id, 0, :, :, 0]
ax = plt.subplot(rows,cols,i+1)
ax.set_title(slice_id)
if flip_ud:
image = np.flipud(image)
elif flip_lr:
image = np.fliplr(image)
ax.imshow(image, cmap='gray')
for p in [p1, p2, p3, p4]:
try:
ax.scatter(x=p[1], y=p[0])
except:
pass
plt.suptitle('End-Diastolic Three Chamber')
plt.show()
plt.figure(figsize = (16,4*rows))
for i, slice in enumerate(es_output):
slice_id = slice[0]
p1 = slice[3]
p2 = slice[4]
p3 = slice[5]
p4 = slice[6]
image = volume[slice_id, int(es_phase_float * ThreeChamberLoc.num_phases), :, :, 0]
ax = plt.subplot(rows,cols,i+1)
ax.set_title(slice_id)
if flip_ud:
image = np.flipud(image)
elif flip_lr:
image = np.fliplr(image)
ax.imshow(image, cmap='gray')
for p in [p1, p2, p3, p4]:
try:
ax.scatter(x=p[1], y=p[0])
except:
pass
plt.suptitle('End-Systolic Three Chamber')
plt.show()
# SELECT ONE 3CH SLICE TO INCLUDE IN THE MODEL
options = list(slice_info_df[slice_info_df['View'] == '3CH']['Slice ID'])
items = [widgets.Checkbox(
value=True,
description='Slice ID {}'.format(i),
disabled=False,
indent=False
) for i in options]
grid = widgets.GridBox(items, layout=widgets.Layout(grid_template_columns="repeat(1, 100px)"))
grid
selected_3ch_locations = []
for i in range(len(items)):
value = items[i].value
if value == True:
selected_3ch_locations.append(int(items[i].description.split(' ')[-1]))
three_chamber_df = pd.DataFrame(ed_output, columns=['Slice ID', 'View', 'Time Frame', 'MV1', 'MV2', 'AV1', 'AV2'])
three_chamber_df = three_chamber_df.append(
pd.DataFrame(es_output, columns=['Slice ID', 'View', 'Time Frame', 'MV1', 'MV2', 'AV1', 'AV2'])
)
three_chamber_df = three_chamber_df[three_chamber_df['Slice ID'].isin(selected_3ch_locations)]
three_chamber_df.reset_index(inplace=True, drop=True)
three_chamber_df
| Slice ID | View | Time Frame | MV1 | MV2 | AV1 | AV2 | |
|---|---|---|---|---|---|---|---|
| 0 | 5 | 3CH | 0 | [121, 124] | [142, 135] | [122, 116] | [115, 127] |
| 1 | 5 | 3CH | 12 | [127, 136] | [141, 144] | [120, 119] | [123, 126] |
# if necessary, update landmark predictions
from annotations import Annotate
# Settings
index = 1
slice_id = 5
labels = ['MV1', 'MV2'] # should be list of labels (e.g., ['AV1', 'AV2'])
annotator = Annotate(three_chamber_df, volume, index, slice_id, labels)
%matplotlib notebook
annotator.plot()
# update dataframe and display
three_chamber_df = annotator.update()
three_chamber_df
| Slice ID | View | Time Frame | MV1 | MV2 | AV1 | AV2 | |
|---|---|---|---|---|---|---|---|
| 0 | 5 | 3CH | 0 | [126, 128] | [136, 133] | [112, 126] | [122, 120] |
| 1 | 5 | 3CH | 12 | [125, 135] | [134, 140] | [110, 132] | [121, 125] |
Predicte PV insertion points from the RVOT view
# Initiate a landmark localization module in the RVOT view
RVOTLoc = LandmarkLocalization( os.path.join(dst, patient) , slice_info_df, 'RVOT' )
# Load tensorflow model
RVOTLoc.load_tensorflow_model()
# Settings
RVOTLoc.num_phases = num_phases # Change number of phases if necessary
RVOTLoc.normalize_predictions = True
RVOTLoc.test_time_augmentations = True
RVOTLoc.number_of_test_augmentations = 5
# Generate input
RVOTLoc.generate_mapping_dictionary()
RVOTLoc.generate_complete_volume()
# Make predictions for ED phase
RVOTLoc.predict_landmarks(prediction_phase=0)
ed_output = RVOTLoc.output
# Make predictions for ES phase
RVOTLoc.predict_landmarks(prediction_phase= int(es_phase_float * RVOTLoc.num_phases))
es_output = RVOTLoc.output
# display landmark predictions
%matplotlib inline
volume = RVOTLoc.volume
cols = 4
rows = int(np.ceil(len(ed_output)/cols))
plt.figure(figsize = (16,4*rows))
for i, slice in enumerate(ed_output):
slice_id = slice[0]
p1 = slice[3]
p2 = slice[4]
image = volume[slice_id, 0, :, :, 0]
ax = plt.subplot(rows,cols,i+1)
ax.set_title(slice_id)
ax.imshow(image, cmap='gray')
for p in [p1, p2]:
try:
ax.scatter(x=p[1], y=p[0])
except:
pass
plt.show()
plt.figure(figsize = (16,4*rows))
for i, slice in enumerate(es_output):
slice_id = slice[0]
p1 = slice[3]
p2 = slice[4]
image = volume[slice_id, int(es_phase_float * RVOTLoc.num_phases), :, :, 0]
ax = plt.subplot(rows,cols,i+1)
ax.set_title(slice_id)
ax.imshow(image, cmap='gray')
for p in [p1, p2]:
try:
ax.scatter(x=p[1], y=p[0])
except:
pass
plt.show()
# SELECT ONE RVOT VIEW TO INCLUDE IN THE MODEL
options = list(slice_info_df[slice_info_df['View'] == 'RVOT']['Slice ID'])
items = [widgets.Checkbox(
value=False,
description='Slice ID {}'.format(i),
disabled=False,
indent=False
) for i in options]
grid = widgets.GridBox(items, layout=widgets.Layout(grid_template_columns="repeat(1, 100px)"))
grid
selected_rvot_locations = []
for i in range(len(items)):
value = items[i].value
if value == True:
selected_rvot_locations.append(int(items[i].description.split(' ')[-1]))
rvot_df = pd.DataFrame(ed_output, columns=['Slice ID', 'View', 'Time Frame', 'PV1', 'PV2'])
rvot_df = rvot_df.append(pd.DataFrame(es_output, columns=['Slice ID', 'View', 'Time Frame', 'PV1', 'PV2']))
rvot_df = rvot_df[rvot_df['Slice ID'].isin(selected_rvot_locations)]
rvot_df.reset_index(inplace=True, drop=True)
rvot_df
| Slice ID | View | Time Frame | PV1 | PV2 | |
|---|---|---|---|---|---|
| 0 | 3 | RVOT | 0 | [112, 95] | [126, 99] |
| 1 | 3 | RVOT | 12 | [114, 92] | [123, 100] |
If necessary, update the PV insertion point predictions.
# save all dataframes to csv
landmarks_df = pd.concat([sa_df, four_chamber_df, three_chamber_df, rvot_df])
landmarks_df.to_csv(os.path.join( os.path.join(dst, patient), 'landmark_points.csv'))
landmarks_df
| Slice ID | View | Time Frame | RV1 | RV2 | MV1 | MV2 | TV1 | TV2 | LVA | AV1 | AV2 | PV1 | PV2 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 12 | SA | 0 | [107, 107] | [129, 120] | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 1 | 17 | SA | 0 | [108, 108] | [135, 115] | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 2 | 20 | SA | 0 | [107, 107] | [138, 115] | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 3 | 12 | SA | 12 | [110, 99] | [123, 114] | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 4 | 17 | SA | 12 | [110, 107] | [127, 109] | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 5 | 20 | SA | 12 | [107, 105] | [133, 107] | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 0 | 9 | 4CH | 0 | NaN | NaN | [136, 127] | [154, 136] | [131, 129] | [116, 116] | [120, 174] | NaN | NaN | NaN | NaN |
| 1 | 9 | 4CH | 12 | NaN | NaN | [135, 129] | [149, 141] | [130, 129] | [115, 120] | [118, 170] | NaN | NaN | NaN | NaN |
| 0 | 5 | 3CH | 0 | NaN | NaN | [126, 128] | [136, 133] | NaN | NaN | NaN | [112, 126] | [122, 120] | NaN | NaN |
| 1 | 5 | 3CH | 12 | NaN | NaN | [125, 135] | [134, 140] | NaN | NaN | NaN | [110, 132] | [121, 125] | NaN | NaN |
| 0 | 3 | RVOT | 0 | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | [112, 95] | [126, 99] |
| 1 | 3 | RVOT | 12 | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | [114, 92] | [123, 100] |
We do not include landmark points from the two-chamber views; however, it is still optimal to include the segmentations from these views in the final model. If available, select one two-chamber left and one two-chamber right view for inclusion in the model.
%matplotlib inline
# select two-chamber right view, if available
two_chamber_df = slice_info_df[slice_info_df['View'] == '2CH RT']
cols = 4
rows = 1
plt.figure(figsize = (16,4*rows))
two_chamber_df = two_chamber_df.reset_index()
for i, row in two_chamber_df.iterrows():
slice_id = row['Slice ID']
image = volume[slice_id, 0, :, :, 0]
ax = plt.subplot(rows,cols,i+1)
ax.set_title(slice_id)
ax.imshow(image, cmap='gray')
plt.show()
options = list(slice_info_df[slice_info_df['View'] == '2CH RT']['Slice ID'])
items = [widgets.Checkbox(
value=True,
description='Slice ID {}'.format(i),
disabled=False,
indent=False
) for i in options]
grid = widgets.GridBox(items, layout=widgets.Layout(grid_template_columns="repeat(1, 100px)"))
grid
# save selected 2CH RT locations
selected_2ch_rt_locations = []
for i in range(len(items)):
value = items[i].value
if value == True:
selected_2ch_rt_locations.append(int(items[i].description.split(' ')[-1]))
out = []
for loc in selected_2ch_rt_locations:
out.append([loc, '2CH RT', 0])
out.append([loc, '2CH RT', es_phase])
two_chamber_rt_df = pd.DataFrame(out, columns=['Slice ID', 'View', 'Time Frame'])
two_chamber_rt_df
| Slice ID | View | Time Frame | |
|---|---|---|---|
| 0 | 11 | 2CH RT | 0 |
| 1 | 11 | 2CH RT | 12 |
# selecte two-chamber left views, if available
two_chamber_lt_df = slice_info_df[slice_info_df['View'] == '2CH LT']
cols = 4
rows = 1
plt.figure(figsize = (16,4*rows))
two_chamber_lt_df = two_chamber_lt_df.reset_index()
for i, row in two_chamber_lt_df.iterrows():
slice_id = row['Slice ID']
image = volume[slice_id, 0, :, :, 0]
ax = plt.subplot(rows,cols,i+1)
ax.set_title(slice_id)
ax.imshow(image, cmap='gray')
plt.show()
<Figure size 1152x288 with 0 Axes>
options = list(slice_info_df[slice_info_df['View'] == '2CH LT']['Slice ID'])
items = [widgets.Checkbox(
value=True,
description='Slice ID {}'.format(i),
disabled=False,
indent=False
) for i in options]
grid = widgets.GridBox(items, layout=widgets.Layout(grid_template_columns="repeat(1, 100px)"))
grid
# save selected 2CH LT locations
selected_2ch_lt_locations = []
for i in range(len(items)):
value = items[i].value
if value == True:
selected_2ch_lt_locations.append(int(items[i].description.split(' ')[-1]))
out = []
for loc in selected_2ch_lt_locations:
out.append([loc, '2CHLT', 0])
out.append([loc, '2CHLT', es_phase])
two_chamber_lt_df = pd.DataFrame(out, columns=['Slice ID', 'View', 'Time Frame'])
two_chamber_lt_df
| Slice ID | View | Time Frame |
|---|
Myocardial segmentation is performed using nnUnet. Double check that the input and output folders are correct below.
# define I/O parameters for nnUnet segmentation
input_folder = "../data/final/" + patient
output_folder = "../data/segmentations/" + patient
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# default nnUnet parameters
folds = 0
save_npz = False
num_threads_preprocessing = 1
num_threads_nifti_save = 1
lowres_segmentations = None
part_id = False
num_parts = 1
mode = "normal"
For use with the nnUnet pipeline (the model used for segmentation), we need to convert each of the relevant image files to a NIFTI file. The below code used Simple ITK to accomplish this task.
import SimpleITK as sitk
# ensure directory for nii files exists
if not os.path.isdir(os.path.join(input_folder)):
os.mkdir(os.path.join(input_folder))
# generate input volume
ShortAxisLoc = LandmarkLocalization( os.path.join(dst, patient), slice_info_df, 'SA' )
ShortAxisLoc.num_phases = num_phases # Change number of phases if necessary
ShortAxisLoc.generate_mapping_dictionary()
ShortAxisLoc.generate_complete_volume()
volume = ShortAxisLoc.volume
# select ED and ES images for each cardiac view, and convert to nifti
view_dataframes = [sa_df, four_chamber_df, three_chamber_df, rvot_df, two_chamber_rt_df, two_chamber_lt_df]
views = ['SA', '4CH', '3CH', 'RVOT', 'RVT', '2CHLT']
for view, df in enumerate(view_dataframes):
# ensure view folder exists
if not os.path.isdir(os.path.join(input_folder, views[view])):
os.mkdir(os.path.join(input_folder, views[view]))
for i, row in df.iterrows():
slice_id = row['Slice ID']
phase = row['Time Frame']
# extract image array from volume of all slices
image_array = volume[slice_id, phase, :, :, 0]
# expand dimensions
image_array = image_array[None, None]
# spacing defined here - could set to pixel spacing instead of 1,1. First value is simply a dummy value since we only
# have 2D, just needs to be larger than the x and y spacing.
spacing = (999,1,1)
for j, k in enumerate(image_array): # keep for loop here in case additional slices/modalities are needed later
itk_image = sitk.GetImageFromArray(k)
itk_image.SetSpacing(list(spacing)[::-1])
# write nii files to folder for use with nnUnet - only need to save images that need predictions (i.e., ED and ES
# for selected views/slices.
sitk.WriteImage(itk_image, os.path.join(input_folder, views[view],
'{}_{}_{}'.format(views[view],
slice_id, phase) + "_%04.0d.nii.gz" % j))
In the below code, we will iterate through each of the cardiac views and use the appropriate nnUnet model to make predictions. Models are saved under models/Segmentation/ and are organized by task (i.e., view).
# reset gpu memory (necessary if tensorflow/landmark models were loaded previously)
# ONLY RUN THIS ONCE OR THE KERNEL MAY CRASH
tf.keras.backend.clear_session()
torch.cuda.empty_cache()
from numba import cuda
cuda.select_device(0)
cuda.close()
Change the model folder to the absolute path for the segmentation models (e.g., "/home/ubuntu/CAP/CAP-FullAutomation/models/Segmentation").
model_folder = "/home/ubuntu/CAP/CAP-FullAutomation/models/Segmentation"
tasks = ['Task101_SAX', 'Task108_4CH', 'Task107_3CH', 'Task109_RVOT', 'Task110_RVT', 'Task106_2CH']
views = ['SA', '4CH', '3CH', 'RVOT', 'RVT', '2CHLT']
# loop through each view
for i, view in enumerate(views):
print('*** Making predictions for {} images ***'.format(view))
# Define the trained model to use (Specified by the Task)
model_folder_name = "../models/Segmentation/{}/nnUNetTrainerV2__nnUNetPlansv2.1/".format(tasks[i])
view_input_folder = os.path.join(input_folder, view)
view_output_folder = os.path.join(output_folder, view)
# ensure output folder exists
if not os.path.isdir(view_output_folder):
os.mkdir(view_output_folder)
if len(os.listdir(view_input_folder)) > 0:
# run nnUnet inference directly from folder of 2D nii files
predict_from_folder(model_folder_name, view_input_folder, view_output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, not False, mode=mode)
print('Done with {}\n'.format(view))
*** Making predictions for SA images ***
This model expects 1 input modalities for each image
Found 6 unique case ids, here are some examples: ['SA_20_0' 'SA_12_12' 'SA_12_0' 'SA_17_12' 'SA_17_0' 'SA_17_0']
If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc
emptying cuda cache
loading parameters for folds, 0
using the following model files: ['../models/Segmentation/Task101_SAX/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model']
starting preprocessing generator
starting prediction...
preprocessing ../data/segmentations/CHD1055301/SA/SA_12_0.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
preprocessing ../data/segmentations/CHD1055301/SA/SA_12_12.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
preprocessing ../data/segmentations/CHD1055301/SA/SA_17_0.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
preprocessing ../data/segmentations/CHD1055301/SA/SA_17_12.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
preprocessing ../data/segmentations/CHD1055301/SA/SA_20_0.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
preprocessing ../data/segmentations/CHD1055301/SA/SA_20_12.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
This worker has ended successfully, no errors to report
predicting ../data/segmentations/CHD1055301/SA/SA_12_0.nii.gz
debug: mirroring True mirror_axes (0, 1)
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
force_separate_z: None interpolation order: predicting ../data/segmentations/CHD1055301/SA/SA_12_12.nii.gz
debug: mirroring True mirror_axes (0, 1)
predicting ../data/segmentations/CHD1055301/SA/SA_17_0.nii.gz
1
separate z: True lowres axis [0]
separate z, order in z is 0
order inplane is 1force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
debug: mirroring True mirror_axes (0, 1)
predicting ../data/segmentations/CHD1055301/SA/SA_17_12.nii.gz
debug: mirroring True mirror_axes (0, 1)
predicting ../data/segmentations/CHD1055301/SA/SA_20_0.nii.gz
debug: mirroring True mirror_axes (0, 1)
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
predicting ../data/segmentations/CHD1055301/SA/SA_20_12.nii.gz
debug: mirroring True mirror_axes (0, 1)
inference done. Now waiting for the segmentation export to finish...
WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run consolidate_folds in the output folder of the model first!
The folder you need to run this in is ../models/Segmentation/Task101_SAX/nnUNetTrainerV2__nnUNetPlansv2.1/
Done with SA
*** Making predictions for 4CH images ***
This model expects 1 input modalities for each image
Found 2 unique case ids, here are some examples: ['4CH_9_0' '4CH_9_12']
If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc
emptying cuda cache
loading parameters for folds, 0
using the following model files: ['../models/Segmentation/Task108_4CH/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model']
starting preprocessing generator
starting prediction...
preprocessing ../data/segmentations/CHD1055301/4CH/4CH_9_0.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
preprocessing ../data/segmentations/CHD1055301/4CH/4CH_9_12.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
This worker has ended successfully, no errors to report
predicting ../data/segmentations/CHD1055301/4CH/4CH_9_0.nii.gz
debug: mirroring True mirror_axes (0, 1)
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is predicting ../data/segmentations/CHD1055301/4CH/4CH_9_12.nii.gz
debug: mirroring True mirror_axes (0, 1)
inference done. Now waiting for the segmentation export to finish...
0 order inplane is 1
WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run consolidate_folds in the output folder of the model first!
The folder you need to run this in is ../models/Segmentation/Task108_4CH/nnUNetTrainerV2__nnUNetPlansv2.1/
Done with 4CH
*** Making predictions for 3CH images ***
This model expects 1 input modalities for each image
Found 2 unique case ids, here are some examples: ['3CH_5_12' '3CH_5_0']
If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc
emptying cuda cache
loading parameters for folds, 0
using the following model files: ['../models/Segmentation/Task107_3CH/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model']
starting preprocessing generator
starting prediction...
preprocessing ../data/segmentations/CHD1055301/3CH/3CH_5_0.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
preprocessing ../data/segmentations/CHD1055301/3CH/3CH_5_12.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
This worker has ended successfully, no errors to report
predicting ../data/segmentations/CHD1055301/3CH/3CH_5_0.nii.gz
debug: mirroring True mirror_axes (0, 1)
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 predicting ../data/segmentations/CHD1055301/3CH/3CH_5_12.nii.gz
debug: mirroring True mirror_axes (0, 1)
inference done. Now waiting for the segmentation export to finish...
order inplane is 1
WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run consolidate_folds in the output folder of the model first!
The folder you need to run this in is ../models/Segmentation/Task107_3CH/nnUNetTrainerV2__nnUNetPlansv2.1/
Done with 3CH
*** Making predictions for RVOT images ***
This model expects 1 input modalities for each image
Found 2 unique case ids, here are some examples: ['RVOT_3_12' 'RVOT_3_0']
If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc
emptying cuda cache
loading parameters for folds, 0
using the following model files: ['../models/Segmentation/Task109_RVOT/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model']
starting preprocessing generator
starting prediction...
preprocessing ../data/segmentations/CHD1055301/RVOT/RVOT_3_0.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
preprocessing ../data/segmentations/CHD1055301/RVOT/RVOT_3_12.nii.gz
using preprocessor PreprocessorFor2D
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.328125, 1.328125]), 'data.shape (data is resampled)': (1, 1, 193, 193)}
normalization...
normalization done
(1, 1, 193, 193)
This worker has ended successfully, no errors to report
predicting ../data/segmentations/CHD1055301/RVOT/RVOT_3_0.nii.gz
debug: mirroring True mirror_axes (0, 1)
predicting ../data/segmentations/CHD1055301/RVOT/RVOT_3_12.nii.gz
debug: mirroring True mirror_axes (0, 1)
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
inference done. Now waiting for the segmentation export to finish...
WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run consolidate_folds in the output folder of the model first!
The folder you need to run this in is ../models/Segmentation/Task109_RVOT/nnUNetTrainerV2__nnUNetPlansv2.1/
Done with RVOT
*** Making predictions for RVT images ***
This model expects 1 input modalities for each image
Found 2 unique case ids, here are some examples: ['RVT_11_0' 'RVT_11_0']
If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc
emptying cuda cache
loading parameters for folds, 0
using the following model files: ['../models/Segmentation/Task110_RVT/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0/model_final_checkpoint.model']
starting preprocessing generator
starting prediction...
preprocessing ../data/segmentations/CHD1055301/RVT/RVT_11_0.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.40625, 1.40625]), 'data.shape (data is resampled)': (1, 1, 182, 182)}
normalization...
normalization done
(1, 1, 182, 182)
preprocessing ../data/segmentations/CHD1055301/RVT/RVT_11_12.nii.gz
using preprocessor PreprocessorFor2D
before crop: (1, 1, 256, 256) after crop: (1, 1, 256, 256) spacing: [999. 1. 1.]
separate z, order in z is 0 order inplane is 3
separate z, order in z is 0 order inplane is 1
before: {'spacing': array([999., 1., 1.]), 'spacing_transposed': array([999., 1., 1.]), 'data.shape (data is transposed)': (1, 1, 256, 256)}
after: {'spacing': array([999. , 1.40625, 1.40625]), 'data.shape (data is resampled)': (1, 1, 182, 182)}
normalization...
normalization done
(1, 1, 182, 182)
This worker has ended successfully, no errors to report
predicting ../data/segmentations/CHD1055301/RVT/RVT_11_0.nii.gz
debug: mirroring True mirror_axes (0, 1)
force_separate_z: None interpolation order: 1
separate z: True lowres axis [0]
separate z, order in z is 0 order inplane is 1
force_separate_z: None interpolation order: 1
separate z: True lowres axispredicting ../data/segmentations/CHD1055301/RVT/RVT_11_12.nii.gz
debug: mirroring True mirror_axes (0, 1)
inference done. Now waiting for the segmentation export to finish...
[0]
separate z, order in z is 0 order inplane is 1
WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run consolidate_folds in the output folder of the model first!
The folder you need to run this in is ../models/Segmentation/Task110_RVT/nnUNetTrainerV2__nnUNetPlansv2.1/
Done with RVT
*** Making predictions for 2CHLT images ***
# display generated segmentations
import nibabel as nib
%matplotlib inline
# view some of the segmentations
plt.figure(figsize=(16,24))
count = 1
for i, view in enumerate(views):
view_output_folder = os.path.join(output_folder, view)
view_input_folder = os.path.join(input_folder, view)
segs = os.listdir(view_output_folder)
segs = [x for x in segs if '.nii.gz' in x]
if len(segs) > 0:
# load first image
for idx in range(len(segs)):
subplot = count
# change input name to include modality info (nnUnet formatting)
prefix = segs[idx].split('.')
input_name = prefix[0] + '_0000.' + prefix[1] + '.' + prefix[2]
seg = nib.load(os.path.join(view_output_folder, segs[idx]))
img = nib.load(os.path.join(view_input_folder, input_name))
ax = plt.subplot(6,4,subplot)
ax.imshow(np.array(img.dataobj).T[0,:,:], cmap='gray')
ax.imshow(np.array(seg.dataobj).T[0,:,:], alpha=0.3, cmap='inferno')
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(segs[0])
count += 1
plt.show()
Processes the segmentations and landmarks by extracting contours for inclusion in the guide point file. As a part of this process, we perform an inverse transform from image to model coordinates using the affine matrix provided in the DICOM header. Landmark points are loaded from the csv file that was saved previously. To display guide points as they are generated, set display = True in the .extract_guidepoints() function.
def parse_string(s):
try:
if ',' in s and 'array' not in s:
p1 = int(s.split('[')[1].split(',')[0])
p2 = int(s.split('[')[1].split(',')[1].rstrip(']'))
return np.array(p1, dtype=np.int64), np.array(p2, dtype=np.int64)
elif 'array' in s:
p1 = int(s.split('array([')[1].split(']')[0])
p2 = int(s.split('array([')[2].split('])')[0])
return np.array(p1, dtype=np.int64), np.array(p2, dtype=np.int64)
else:
return s
except:
return s
# reload landmarks dataframe if necessary
landmarks_df = pd.read_csv(os.path.join('/home/ubuntu/CAP/CAP-FullAutomation/data/processed', patient, 'landmark_points.csv'),converters={'object':parse_string})
landmarks_df = landmarks_df.drop(columns=['Unnamed: 0']).applymap(parse_string)
landmarks_df
| Slice ID | View | Time Frame | RV1 | RV2 | MV1 | MV2 | TV1 | TV2 | LVA | AV1 | AV2 | PV1 | PV2 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 12 | SA | 0 | (107, 107) | (129, 120) | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 1 | 17 | SA | 0 | (108, 108) | (135, 115) | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 2 | 20 | SA | 0 | (107, 107) | (138, 115) | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 3 | 12 | SA | 12 | (110, 99) | (123, 114) | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 4 | 17 | SA | 12 | (110, 107) | (127, 109) | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 5 | 20 | SA | 12 | (107, 105) | (133, 107) | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| 6 | 9 | 4CH | 0 | NaN | NaN | (136, 127) | (154, 136) | (131, 129) | (116, 116) | (120, 174) | NaN | NaN | NaN | NaN |
| 7 | 9 | 4CH | 12 | NaN | NaN | (135, 129) | (149, 141) | (130, 129) | (115, 120) | (118, 170) | NaN | NaN | NaN | NaN |
| 8 | 5 | 3CH | 0 | NaN | NaN | (126, 128) | (136, 133) | NaN | NaN | NaN | (112, 126) | (122, 120) | NaN | NaN |
| 9 | 5 | 3CH | 12 | NaN | NaN | (125, 135) | (134, 140) | NaN | NaN | NaN | (110, 132) | (121, 125) | NaN | NaN |
| 10 | 3 | RVOT | 0 | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | (112, 95) | (126, 99) |
| 11 | 3 | RVOT | 12 | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | (114, 92) | (123, 100) |
Provide the absolute path to the image (with .nii inputs), segmentation (with .nii segmentations from nnUnet), and final output folder.
from guidepointprocessing import GuidePointProcessing
# define I/O parameters for nnUnet segmentation
image_folder = "/home/ubuntu/CAP/CAP-FullAutomation/data/final/" + patient
segment_folder = "/home/ubuntu/CAP/CAP-FullAutomation/data/segmentations/" + patient
output_folder = "/home/ubuntu/CAP/CAP-FullAutomation/data/processed/" + patient
GuidePoints = GuidePointProcessing(patient,
image_folder,
segment_folder,
output_folder,
slice_info_df,
landmarks_df)
GuidePoints.extract_guidepoints(display = True)
Guide point file already exists at /home/ubuntu/CAP/CAP-FullAutomation/data/processed/CHD1055301/GP_ED.txt Removing file, and regenerating guide points.. Guide point file already exists at /home/ubuntu/CAP/CAP-FullAutomation/data/processed/CHD1055301/GP_ES.txt Removing file, and regenerating guide points..
12 20
# Display guide point results if desired
from mpl_toolkits import mplot3d
coordinates = []
model_folder = '../data/processed/' + patient
with open(model_folder + '/GP_ES.txt', 'r') as f:
for i,line in enumerate(f.readlines()):
if i > 0:
points = line.split('\t')
coordinates.append([float(points[0]), float(points[1]), float(points[2]), points[3]])
len_colors = len(np.unique(np.array(coordinates)[:,3]))
from matplotlib import cm
new_map = plt.cm.get_cmap('hsv', len_colors)
# define colors for each point
map = set(np.array(coordinates)[:,3])
map_dict = {'AORTA_VALVE': 'r',
'MITRAL_VALVE': 'b',
'RV_INSERT': 'g',
'APEX_POINT': 'y',
'SAX_RV_FREEWALL': '#F2CA19',
'LAX_LV_EPICARDIAL': '#0057E9',
'TRICUSPID_VALVE': 'k',
'LAX_RV_SEPTUM': '#E11845',
'SAX_RV_SEPTUM': '#E11845',
'LAX_RV_EPICARDIAL': '#0057E9',
'LAX_RV_FREEWALL': '#F2CA19',
'PULMONARY_VALVE': 'purple',
'SAX_LV_ENDOCARDIAL': '#87E911',
'SAX_LV_EPICARDIAL': '#0057E9',
'SAX_RV_EPICARDIAL': '#0057E9',
'LAX_LV_ENDOCARDIAL': '#87E911'}
%matplotlib inline
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
labels = []
for p1 in coordinates:
lab = p1[3]
if 'LAX' in lab or 'SAX' in lab:
s = 3
else:
s = 10
if lab in labels:
ax.scatter3D(p1[0], p1[1], p1[2], color=map_dict[lab], s=s)
else:
labels.append(lab)
ax.scatter3D(p1[0], p1[1], p1[2], color=map_dict[lab], label=p1[3], s=s)
plt.legend()
plt.show()
# move GP and slice info files to the BiV Modelling v2 module
import shutil
root = '/home/ubuntu/CAP/CAP-FullAutomation/CIM/BiV_Modelling_v2/test_data/'
if not os.path.isdir(root + patient):
os.mkdir(os.path.join(root, patient))
shutil.copyfile(os.path.join(dst, patient, 'GP_ED.txt'), os.path.join(root, patient, 'GP_ED.txt'))
shutil.copyfile(os.path.join(dst, patient, 'GP_ES.txt'), os.path.join(root, patient, 'GP_ES.txt'))
shutil.copyfile(os.path.join(dst, patient, 'SliceInfo.txt'), os.path.join(root, patient, 'SliceInfo.txt'))
'/home/ubuntu/CAP/CAP-FullAutomation/CIM/BiV_Modelling_v2/test_data/CHD9925301/SliceInfo.txt'